"git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "jobserver 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"libgit2-sys 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[[package]]
+name = "jobserver"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "kernel32-sys"
version = "0.2.2"
"checksum hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bf088f042a467089e9baa4972f57f9247e42a0cc549ba264c7a04fbb8ecb89d4"
"checksum idna 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2233d4940b1f19f0418c158509cd7396b8d70a5db5705ce410914dc8fa603b37"
"checksum itoa 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eb2f404fbc66fd9aac13e998248505e7ecb2ad8e44ab6388684c5fb11c6c251c"
+"checksum jobserver 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3d97367c7112f0a46029cdf826c442bb71061789b0dcd1f0e71ce56ce3ee7a6a"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3b37545ab726dd833ec6420aaba8231c5b320814b9029ad585555d2a03e94fbf"
"checksum libc 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "e7eb6b826bfc1fdea7935d46556250d1799b7fe2d9f7951071f4291710665e3e"
git2 = "0.6"
git2-curl = "0.7"
glob = "0.2"
+jobserver = "0.1.2"
libc = "0.2"
libgit2-sys = "0.6"
log = "0.3"
semver = { version = "0.7.0", features = ["serde"] }
serde = "1.0"
serde_derive = "1.0"
-serde_json = "1.0"
serde_ignored = "0.0.3"
+serde_json = "1.0"
shell-escape = "0.1"
tar = { version = "0.4", default-features = false }
tempdir = "0.3"
extern crate fs2;
extern crate git2;
extern crate glob;
+extern crate jobserver;
extern crate libc;
extern crate libgit2_sys;
extern crate num_cpus;
host_triple: host_triple,
requested_target: opts.target.map(|s| s.to_owned()),
release: opts.release,
+ jobs: 1,
..BuildConfig::default()
},
profiles)?;
jobs: Option<u32>,
target: Option<String>)
-> CargoResult<ops::BuildConfig> {
+ if jobs.is_some() && config.jobserver_from_env().is_some() {
+ config.shell().warn("a `-j` argument was passed to Cargo but Cargo is \
+ also configured with an external jobserver in \
+ its environment, ignoring the `-j` parameter")?;
+ }
let cfg_jobs = match config.get_i64("build.jobs")? {
Some(v) => {
if v.val <= 0 {
use std::str::{self, FromStr};
use std::sync::Arc;
+use jobserver::Client;
+
use core::{Package, PackageId, PackageSet, Resolve, Target, Profile};
use core::{TargetKind, Profiles, Dependency, Workspace};
use core::dependency::Kind as DepKind;
pub build_scripts: HashMap<Unit<'a>, Arc<BuildScripts>>,
pub links: Links<'a>,
pub used_in_plugin: HashSet<Unit<'a>>,
+ pub jobserver: Client,
host: Layout,
target: Option<Layout>,
config.rustc()?.verbose_version.contains("-dev");
let incremental_enabled = incremental_enabled && is_nightly;
+ // Load up the jobserver that we'll use to manage our parallelism. This
+ // is the same as the GNU make implementation of a jobserver, and
+ // intentionally so! It's hoped that we can interact with GNU make and
+ // all share the same jobserver.
+ //
+ // Note that if we don't have a jobserver in our environment then we
+ // create our own, and we create it with `n-1` tokens because one token
+ // is ourself, a running process.
+ let jobserver = match config.jobserver_from_env() {
+ Some(c) => c.clone(),
+ None => Client::new(build_config.jobs as usize - 1).chain_err(|| {
+ "failed to create jobserver"
+ })?,
+ };
+
Ok(Context {
ws: ws,
host: host_layout,
links: Links::new(),
used_in_plugin: HashSet::new(),
incremental_enabled: incremental_enabled,
+ jobserver: jobserver,
})
}
.env("PROFILE", if cx.build_config.release { "release" } else { "debug" })
.env("HOST", cx.host_triple())
.env("RUSTC", &cx.config.rustc()?.path)
- .env("RUSTDOC", &*cx.config.rustdoc()?);
+ .env("RUSTDOC", &*cx.config.rustdoc()?)
+ .inherit_jobserver(&cx.jobserver);
if let Some(links) = unit.pkg.manifest().links() {
cmd.env("CARGO_MANIFEST_LINKS", links);
use std::collections::HashSet;
use std::collections::hash_map::HashMap;
use std::fmt;
-use std::io::Write;
+use std::io::{self, Write};
+use std::mem;
use std::sync::mpsc::{channel, Sender, Receiver};
use crossbeam::{self, Scope};
+use jobserver::{Acquired, HelperThread};
use term::color::YELLOW;
use core::{PackageId, Target, Profile};
use util::{Config, DependencyQueue, Fresh, Dirty, Freshness};
-use util::{CargoResult, ProcessBuilder, profile, internal};
+use util::{CargoResult, ProcessBuilder, profile, internal, CargoResultExt};
use {handle_error};
use super::{Context, Kind, Unit};
/// actual compilation step of each package. Packages enqueue units of work and
/// then later on the entire graph is processed and compiled.
pub struct JobQueue<'a> {
- jobs: usize,
queue: DependencyQueue<Key<'a>, Vec<(Job, Freshness)>>,
- tx: Sender<(Key<'a>, Message)>,
- rx: Receiver<(Key<'a>, Message)>,
+ tx: Sender<Message<'a>>,
+ rx: Receiver<Message<'a>>,
active: usize,
pending: HashMap<Key<'a>, PendingBuild>,
compiled: HashSet<&'a PackageId>,
}
pub struct JobState<'a> {
- tx: Sender<(Key<'a>, Message)>,
- key: Key<'a>,
+ tx: Sender<Message<'a>>,
}
-enum Message {
+enum Message<'a> {
Run(String),
Stdout(String),
Stderr(String),
- Finish(CargoResult<()>),
+ Token(io::Result<Acquired>),
+ Finish(Key<'a>, CargoResult<()>),
}
impl<'a> JobState<'a> {
pub fn running(&self, cmd: &ProcessBuilder) {
- let _ = self.tx.send((self.key, Message::Run(cmd.to_string())));
+ let _ = self.tx.send(Message::Run(cmd.to_string()));
}
pub fn stdout(&self, out: &str) {
- let _ = self.tx.send((self.key, Message::Stdout(out.to_string())));
+ let _ = self.tx.send(Message::Stdout(out.to_string()));
}
pub fn stderr(&self, err: &str) {
- let _ = self.tx.send((self.key, Message::Stderr(err.to_string())));
+ let _ = self.tx.send(Message::Stderr(err.to_string()));
}
}
pub fn new<'cfg>(cx: &Context<'a, 'cfg>) -> JobQueue<'a> {
let (tx, rx) = channel();
JobQueue {
- jobs: cx.jobs() as usize,
queue: DependencyQueue::new(),
tx: tx,
rx: rx,
pub fn execute(&mut self, cx: &mut Context) -> CargoResult<()> {
let _p = profile::start("executing the job graph");
+ // We need to give a handle to the send half of our message queue to the
+ // jobserver helper thrad. Unfortunately though we need the handle to be
+ // `'static` as that's typically what's required when spawning a
+ // thread!
+ //
+ // To work around this we transmute the `Sender` to a static lifetime.
+ // we're only sending "longer living" messages and we should also
+ // destroy all references to the channel before this function exits as
+ // the destructor for the `helper` object will ensure the associated
+ // thread i sno longer running.
+ //
+ // As a result, this `transmute` to a longer lifetime should be safe in
+ // practice.
+ let tx = self.tx.clone();
+ let tx = unsafe {
+ mem::transmute::<Sender<Message<'a>>, Sender<Message<'static>>>(tx)
+ };
+ let helper = cx.jobserver.clone().into_helper_thread(move |token| {
+ drop(tx.send(Message::Token(token)));
+ }).chain_err(|| {
+ "failed to create helper thread for jobserver management"
+ })?;
+
crossbeam::scope(|scope| {
- self.drain_the_queue(cx, scope)
+ self.drain_the_queue(cx, scope, &helper)
})
}
- fn drain_the_queue(&mut self, cx: &mut Context, scope: &Scope<'a>)
+ fn drain_the_queue(&mut self,
+ cx: &mut Context,
+ scope: &Scope<'a>,
+ jobserver_helper: &HelperThread)
-> CargoResult<()> {
use std::time::Instant;
+ let mut tokens = Vec::new();
let mut queue = Vec::new();
trace!("queue: {:#?}", self.queue);
// Iteratively execute the entire dependency graph. Each turn of the
// loop starts out by scheduling as much work as possible (up to the
- // maximum number of parallel jobs). A local queue is maintained
- // separately from the main dependency queue as one dequeue may actually
- // dequeue quite a bit of work (e.g. 10 binaries in one project).
+ // maximum number of parallel jobs we have tokens for). A local queue
+ // is maintained separately from the main dependency queue as one
+ // dequeue may actually dequeue quite a bit of work (e.g. 10 binaries
+ // in one project).
//
// After a job has finished we update our internal state if it was
// successful and otherwise wait for pending work to finish if it failed
let mut error = None;
let start_time = Instant::now();
loop {
- while error.is_none() && self.active < self.jobs {
- if !queue.is_empty() {
- let (key, job, fresh) = queue.remove(0);
- self.run(key, fresh, job, cx.config, scope)?;
- } else if let Some((fresh, key, jobs)) = self.queue.dequeue() {
- let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| {
- f.combine(fresh)
- });
- self.pending.insert(key, PendingBuild {
- amt: jobs.len(),
- fresh: total_fresh,
- });
- queue.extend(jobs.into_iter().map(|(job, f)| {
- (key, job, f.combine(fresh))
- }));
- } else {
- break
+ // Dequeue as much work as we can, learning about everything
+ // possible that can run. Note that this is also the point where we
+ // start requesting job tokens. Each job after the first needs to
+ // request a token.
+ while let Some((fresh, key, jobs)) = self.queue.dequeue() {
+ let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| {
+ f.combine(fresh)
+ });
+ self.pending.insert(key, PendingBuild {
+ amt: jobs.len(),
+ fresh: total_fresh,
+ });
+ for (job, f) in jobs {
+ queue.push((key, job, f.combine(fresh)));
+ if self.active + queue.len() > 0 {
+ jobserver_helper.request_token();
+ }
}
}
+
+ // Now that we've learned of all possible work that we can execute
+ // try to spawn it so long as we've got a jobserver token which says
+ // we're able to perform some parallel work.
+ while error.is_none() && self.active < tokens.len() + 1 && !queue.is_empty() {
+ let (key, job, fresh) = queue.remove(0);
+ self.run(key, fresh, job, cx.config, scope)?;
+ }
+
+ // If after all that we're not actually running anything then we're
+ // done!
if self.active == 0 {
break
}
- let (key, msg) = self.rx.recv().unwrap();
+ // And finally, before we block waiting for the next event, drop any
+ // excess tokens we may have accidentally acquired. Due to how our
+ // jobserver interface is architected we may acquire a token that we
+ // don't actually use, and if this happens just relinquish it back
+ // to the jobserver itself.
+ tokens.truncate(self.active - 1);
- match msg {
+ match self.rx.recv().unwrap() {
Message::Run(cmd) => {
cx.config.shell().verbose(|c| c.status("Running", &cmd))?;
}
writeln!(cx.config.shell().err(), "{}", err)?;
}
}
- Message::Finish(result) => {
+ Message::Finish(key, result) => {
info!("end: {:?}", key);
self.active -= 1;
+ if self.active > 0 {
+ assert!(tokens.len() > 0);
+ drop(tokens.pop());
+ }
match result {
Ok(()) => self.finish(key, cx)?,
Err(e) => {
}
}
}
+ Message::Token(acquired_token) => {
+ tokens.push(acquired_token.chain_err(|| {
+ "failed to acquire jobserver token"
+ })?);
+ }
}
}
scope.spawn(move || {
let res = job.run(fresh, &JobState {
tx: my_tx.clone(),
- key: key,
});
- my_tx.send((key, Message::Finish(res))).unwrap();
+ my_tx.send(Message::Finish(key, res)).unwrap();
});
// Print out some nice progress information
/// the build calls.
pub trait Executor: Send + Sync + 'static {
fn init(&self, _cx: &Context) {}
+
/// If execution succeeds, the ContinueBuild value indicates whether Cargo
/// should continue with the build process for this package.
fn exec(&self, cmd: ProcessBuilder, _id: &PackageId) -> CargoResult<()> {
crate_types: Vec<&str>,
unit: &Unit) -> CargoResult<ProcessBuilder> {
let mut base = cx.compilation.rustc_process(unit.pkg)?;
+ base.inherit_jobserver(&cx.jobserver);
build_base_args(cx, &mut base, unit, &crate_types);
build_deps_args(&mut base, cx, unit)?;
Ok(base)
fn rustdoc(cx: &mut Context, unit: &Unit) -> CargoResult<Work> {
let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg)?;
+ rustdoc.inherit_jobserver(&cx.jobserver);
rustdoc.arg("--crate-name").arg(&unit.target.crate_name())
.cwd(cx.config.cwd())
.arg(&root_path(cx, unit));
use std::cell::{RefCell, RefMut, Cell};
+use std::collections::HashSet;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::collections::hash_map::HashMap;
-use std::collections::HashSet;
use std::env;
use std::fmt;
use std::fs::{self, File};
use std::mem;
use std::path::{Path, PathBuf};
use std::str::FromStr;
+use std::sync::{Once, ONCE_INIT};
+use core::MultiShell;
+use core::shell::{Verbosity, ColorConfig};
+use jobserver;
use rustc_serialize::{Encodable,Encoder};
use toml;
-use core::shell::{Verbosity, ColorConfig};
-use core::MultiShell;
use util::Rustc;
use util::errors::{CargoResult, CargoResultExt, CargoError, internal};
-use util::{Filesystem, LazyCell};
use util::paths;
+use util::{Filesystem, LazyCell};
use util::toml as cargo_toml;
extra_verbose: Cell<bool>,
frozen: Cell<bool>,
locked: Cell<bool>,
+ jobserver: Option<jobserver::Client>,
}
impl Config {
pub fn new(shell: MultiShell,
cwd: PathBuf,
homedir: PathBuf) -> Config {
+ static mut GLOBAL_JOBSERVER: *mut jobserver::Client = 0 as *mut _;
+ static INIT: Once = ONCE_INIT;
+
+ // This should be called early on in the process, so in theory the
+ // unsafety is ok here. (taken ownership of random fds)
+ INIT.call_once(|| unsafe {
+ if let Some(client) = jobserver::Client::from_env() {
+ GLOBAL_JOBSERVER = Box::into_raw(Box::new(client));
+ }
+ });
+
Config {
home_path: Filesystem::new(homedir),
shell: RefCell::new(shell),
extra_verbose: Cell::new(false),
frozen: Cell::new(false),
locked: Cell::new(false),
+ jobserver: unsafe {
+ if GLOBAL_JOBSERVER.is_null() {
+ None
+ } else {
+ Some((*GLOBAL_JOBSERVER).clone())
+ }
+ },
}
}
self.maybe_get_tool(tool)
.map(|t| t.unwrap_or(PathBuf::from(tool)))
}
+
+ pub fn jobserver_from_env(&self) -> Option<&jobserver::Client> {
+ self.jobserver.as_ref()
+ }
}
#[derive(Eq, PartialEq, Clone, Copy)]
use std::path::Path;
use std::process::{Command, Stdio, Output};
+use jobserver::Client;
+use shell_escape::escape;
+
use util::{CargoResult, CargoResultExt, CargoError, process_error, read2};
use util::errors::CargoErrorKind;
-use shell_escape::escape;
-#[derive(Clone, PartialEq, Debug)]
+#[derive(Clone, Debug)]
pub struct ProcessBuilder {
program: OsString,
args: Vec<OsString>,
env: HashMap<String, Option<OsString>>,
cwd: Option<OsString>,
+ jobserver: Option<Client>,
}
impl fmt::Display for ProcessBuilder {
pub fn get_envs(&self) -> &HashMap<String, Option<OsString>> { &self.env }
+ pub fn inherit_jobserver(&mut self, jobserver: &Client) -> &mut Self {
+ self.jobserver = Some(jobserver.clone());
+ self
+ }
+
pub fn exec(&self) -> CargoResult<()> {
let mut command = self.build_command();
let exit = command.status().chain_err(|| {
None => { command.env_remove(k); }
}
}
+ if let Some(ref c) = self.jobserver {
+ c.configure(&mut command);
+ }
command
}
args: Vec::new(),
cwd: None,
env: HashMap::new(),
+ jobserver: None,
}
}
triples can be found in [clang’s own documentation][clang].
* `HOST` - the host triple of the rust compiler.
* `NUM_JOBS` - the parallelism specified as the top-level parallelism. This can
- be useful to pass a `-j` parameter to a system like `make`.
+ be useful to pass a `-j` parameter to a system like `make`. Note
+ that care should be taken when interpreting this environment
+ variable. For historical purposes this is still provided but
+ recent versions of Cargo, for example, do not need to run `make
+ -j` as it'll automatically happen. Cargo implements its own
+ [jobserver] and will allow build scripts to inherit this
+ information, so programs compatible with GNU make jobservers will
+ already have appropriately configured parallelism.
* `OPT_LEVEL`, `DEBUG` - values of the corresponding variables for the
profile currently being built.
* `PROFILE` - `release` for release builds, `debug` for other builds.
[links]: build-script.html#the-links-manifest-key
[profile]: manifest.html#the-profile-sections
[clang]:http://clang.llvm.org/docs/CrossCompilation.html#target-triple
+[jobserver]: http://make.mad-scientist.net/papers/jobserver-implementation/
# Environment variables Cargo sets for 3rd party subcommands
.env_remove("XDG_CONFIG_HOME") // see #2345
.env("GIT_CONFIG_NOSYSTEM", "1") // keep trying to sandbox ourselves
.env_remove("EMAIL")
+ .env_remove("MFLAGS")
+ .env_remove("MAKEFLAGS")
.env_remove("GIT_AUTHOR_NAME")
.env_remove("GIT_AUTHOR_EMAIL")
.env_remove("GIT_COMMITTER_NAME")
--- /dev/null
+extern crate cargotest;
+extern crate hamcrest;
+extern crate cargo;
+
+use std::str;
+use std::net::TcpListener;
+use std::thread;
+use std::process::Command;
+
+use cargotest::support::{project, execs, cargo_exe};
+use hamcrest::assert_that;
+
+#[test]
+fn jobserver_exists() {
+ let p = project("foo")
+ .file("Cargo.toml", r#"
+ [package]
+ name = "foo"
+ version = "0.0.1"
+ authors = []
+ "#)
+ .file("build.rs", r#"
+ use std::env;
+
+ fn main() {
+ let var = env::var("MAKEFLAGS").unwrap();
+ let arg = var.split(' ')
+ .find(|p| p.starts_with("--jobserver"))
+ .unwrap();
+ let val = &arg[arg.find('=').unwrap() + 1..];
+ validate(val);
+ }
+
+ #[cfg(unix)]
+ fn validate(s: &str) {
+ use std::fs::File;
+ use std::io::*;
+ use std::os::unix::prelude::*;
+
+ let fds = s.split(',').collect::<Vec<_>>();
+ println!("{}", s);
+ assert_eq!(fds.len(), 2);
+ unsafe {
+ let mut read = File::from_raw_fd(fds[0].parse().unwrap());
+ let mut write = File::from_raw_fd(fds[1].parse().unwrap());
+
+ let mut buf = [0];
+ assert_eq!(read.read(&mut buf).unwrap(), 1);
+ assert_eq!(write.write(&buf).unwrap(), 1);
+ }
+ }
+
+ #[cfg(windows)]
+ fn validate(_: &str) {
+ // a little too complicated for a test...
+ }
+ "#)
+ .file("src/lib.rs", "");
+
+ assert_that(p.cargo_process("build"),
+ execs().with_status(0));
+}
+
+#[test]
+fn makes_jobserver_used() {
+ let make = if cfg!(windows) {"mingw32-make"} else {"make"};
+ if Command::new(make).arg("--version").output().is_err() {
+ return
+ }
+
+ let p = project("foo")
+ .file("Cargo.toml", r#"
+ [package]
+ name = "foo"
+ version = "0.0.1"
+ authors = []
+
+ [dependencies]
+ d1 = { path = "d1" }
+ d2 = { path = "d2" }
+ d3 = { path = "d3" }
+ "#)
+ .file("src/lib.rs", "")
+ .file("d1/Cargo.toml", r#"
+ [package]
+ name = "d1"
+ version = "0.0.1"
+ authors = []
+ build = "../dbuild.rs"
+ "#)
+ .file("d1/src/lib.rs", "")
+ .file("d2/Cargo.toml", r#"
+ [package]
+ name = "d2"
+ version = "0.0.1"
+ authors = []
+ build = "../dbuild.rs"
+ "#)
+ .file("d2/src/lib.rs", "")
+ .file("d3/Cargo.toml", r#"
+ [package]
+ name = "d3"
+ version = "0.0.1"
+ authors = []
+ build = "../dbuild.rs"
+ "#)
+ .file("d3/src/lib.rs", "")
+ .file("dbuild.rs", r#"
+ use std::net::TcpStream;
+ use std::env;
+ use std::io::Read;
+
+ fn main() {
+ let addr = env::var("ADDR").unwrap();
+ let mut stream = TcpStream::connect(addr).unwrap();
+ let mut v = Vec::new();
+ stream.read_to_end(&mut v).unwrap();
+ }
+ "#)
+ .file("Makefile", "\
+all:
+\t+$(CARGO) build
+");
+ p.build();
+
+ let l = TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = l.local_addr().unwrap();
+
+ let child = thread::spawn(move || {
+ let a1 = l.accept().unwrap();
+ let a2 = l.accept().unwrap();
+ l.set_nonblocking(true).unwrap();
+
+ for _ in 0..1000 {
+ assert!(l.accept().is_err());
+ thread::yield_now();
+ }
+
+ drop(a1);
+ l.set_nonblocking(false).unwrap();
+ let a3 = l.accept().unwrap();
+
+ drop((a2, a3));
+ });
+
+ assert_that(p.process(make)
+ .env("CARGO", cargo_exe())
+ .env("ADDR", addr.to_string())
+ .arg("-j2"),
+ execs().with_status(0));
+ child.join().unwrap();
+}
+
+#[test]
+fn jobserver_and_j() {
+ let make = if cfg!(windows) {"mingw32-make"} else {"make"};
+ if Command::new(make).arg("--version").output().is_err() {
+ return
+ }
+
+ let p = project("foo")
+ .file("Cargo.toml", r#"
+ [package]
+ name = "foo"
+ version = "0.0.1"
+ authors = []
+ "#)
+ .file("src/lib.rs", "")
+ .file("Makefile", "\
+all:
+\t+$(CARGO) build -j2
+");
+ p.build();
+
+ assert_that(p.process(make)
+ .env("CARGO", cargo_exe())
+ .arg("-j2"),
+ execs().with_status(0).with_stderr("\
+warning: a `-j` argument was passed to Cargo but Cargo is also configured \
+with an external jobserver in its environment, ignoring the `-j` parameter
+[COMPILING] [..]
+[FINISHED] [..]
+"));
+}